Loading Libraries


In [1]:
import os 
import numpy as np 
import matplotlib.pyplot as plt 
import tensorflow as tf
import gif
import IPython.display as ipyd
from scipy.misc import imresize
#.misc import imresize
# Unable to install scipy yet

% matplotlib inline
plt.style.use('ggplot')

Settings

Flatten Function


In [2]:
def flatten(x, name=None, reuse=None):
    """Flatten Tensor to 2-dimensions.
    Parameters
    ----------
    x : tf.Tensor
        Input tensor to flatten.
    name : None, optional
        Variable scope for flatten operations
    Returns
    -------
    flattened : tf.Tensor
        Flattened tensor.
    """
    with tf.variable_scope('flatten'):
        dims = x.get_shape().as_list()
        if len(dims) == 4:
            flattened = tf.reshape(
                x,
                shape=[-1, dims[1] * dims[2] * dims[3]])
        elif len(dims) == 2 or len(dims) == 1:
            flattened = x
        else:
            raise ValueError('Expected n dimensions of 1, 2 or 4.  Found:',
                             len(dims))

        return flattened

Function for Image Splitting


In [3]:
def split_image(img):
    # We'll first collect all the positions in the image in our list, xs
    xs = []

    # And the corresponding colors for each of these positions
    ys = []

    # Now loop over the image
    for row_i in range(img.shape[0]):
        for col_i in range(img.shape[1]):
            # And store the inputs
            xs.append([row_i, col_i])
            # And outputs that the network needs to learn to predict
            ys.append(img[row_i, col_i])

    # we'll convert our lists to arrays
    xs = np.array(xs)
    ys = np.array(ys)
    return xs, ys

Function to Construct a Single Linear Fully Connected Layer


In [4]:
def linear(x, n_output, name=None, activation=None, reuse=None):
    """Fully connected layer.
    Parameters
    ----------
    x : tf.Tensor
        Input tensor to connect
    n_output : int
        Number of output neurons
    name : None, optional
        Scope to apply
    Returns
    -------
    op : tf.Tensor
        Output of fully connected layer.
    """
    if len(x.get_shape()) != 2:
        x = flatten(x, reuse=reuse)

    n_input = x.get_shape().as_list()[1]

    with tf.variable_scope(name or "fc", reuse=reuse):
        W = tf.get_variable(
            name='W',
            shape=[n_input, n_output],
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())

        b = tf.get_variable(
            name='b',
            shape=[n_output],
            dtype=tf.float32,
            initializer=tf.constant_initializer(0.0))

        h = tf.nn.bias_add(
            name='h',
            value=tf.matmul(x, W),
            bias=b)

        if activation:
            h = activation(h)

        return h, W

Prepar the Data


In [5]:
# Load the image 
img = plt.imread('emma.png')
img = imresize(img,(100, 100))
# Unable to install scipy

# Split 
xs, ys = split_image(img)


# Normalise 
xs = (xs - np.mean(xs, axis = 0)) / np.std(xs, axis =0)
ys = ys / 255.0
# ys is already normalised for some unknow reason


plt.imshow(ys.reshape(img.shape))


Out[5]:
<matplotlib.image.AxesImage at 0x7fb941a31710>

In [12]:
# Activation settign
input_dimension = 2
output_dimension = 3;
no_neurons = 20;
n_iterations = 100;
batch_size = 100;
learning_rate = 0.001

Reset Graph and Reload Data


In [13]:
# Reset
tf.reset_default_graph()

# inputs 
X =tf.placeholder(tf.float32, shape = [None, 2], name = 'X')

# outputs 
Y = tf.placeholder(tf.float32, shape = [None, 3], name = 'Y')

Constrcut the Network


In [14]:
h1, W1 = linear(X, no_neurons, activation = tf.nn.relu, name = 'layer1')
h2, W2 = linear(h1, no_neurons, activation = tf.nn.relu, name = 'layer2')
h3, W3 = linear(h2, no_neurons, activation = tf.nn.relu, name = 'layer3')
h4, W4 = linear(h3, no_neurons, activation = tf.nn.relu, name = 'layer4')
h5, W5 = linear(h4, no_neurons, activation = tf.nn.relu, name = 'layer5')
h6, W6 = linear(h5, no_neurons, activation = tf.nn.relu, name = 'layer6')
Y_pred, W7 = linear(h6, output_dimension, activation = None, name = 'layer_o')

Define Error Function and Optimiser


In [15]:
# Use l2 cost
cost = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(Y, Y_pred),1))
# Adam optimiser 
optimiser = tf.train.AdamOptimizer(learning_rate).minimize(cost)

TensorBoard Logs

Session


In [18]:
%%time 

sess = tf.Session()

## TF Board 

# Initialize all your variables and run the operation with your session
sess.run(tf.global_variables_initializer())

# Optimize over a few iterations, each time following the gradient
# a little at a time
imgs = []
costs = []
gif_step = n_iterations // 10
step_i = 0

# Saver
saver = tf.train.Saver()
#Writer
writer = tf.summary.FileWriter("./tmp/logs/1")
writer.add_graph(sess.graph)

for it_i in range(n_iterations):
    
    # Get a random sampling of the dataset
    idxs = np.random.permutation(range(len(xs)))
    
    # The number of batches we have to iterate over
    n_batches = len(idxs) // batch_size
    
    # Now iterate over our stochastic minibatches:
    for batch_i in range(n_batches):
         
        # Get just minibatch amount of data
        idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]

        # And optimize, also returning the cost so we can monitor
        # how our optimization is doing.
        training_cost = sess.run(
            [cost, optimiser],
            feed_dict={X: xs[idxs_i], Y: ys[idxs_i]})[0]
        
        
        '''

    # Also, every 20 iterations, we'll draw the prediction of our
    # input xs, which should try to recreate our image!
    if (it_i + 1) % gif_step == 0:
        costs.append(training_cost / n_batches)
        ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
        img = np.clip(ys_pred.reshape(img.shape), 0, 1)
        imgs.append(img)
        # Plot the cost over time
        print('Cost: {}' .format(costs[-1]))

        
        fig, ax = plt.subplots(1, 2)
        ax[0].plot(costs)
        ax[0].set_xlabel('Iteration')
        ax[0].set_ylabel('Cost')
        ax[1].imshow(img)
        fig.suptitle('Iteration {}'.format(it_i))
        plt.show()
        
        '''


CPU times: user 42.1 s, sys: 2.88 s, total: 45 s
Wall time: 29 s

In [19]:
## Save the model 
#filename = saver.save(sess, './tmp/my_model', global_step=n_iterations)
# Save the images as a GIF
gif_name = "nn" + str(no_neurons) + "it" + str(n_iterations) + ".gif"
_ = gif.build_gif(imgs, saveto=gif_name, show_gif=False)
ipyd.Image(url=gif_name,height=200, width=200)


---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-19-4b88870d8c68> in <module>()
      3 # Save the images as a GIF
      4 gif_name = "nn" + str(no_neurons) + "it" + str(n_iterations) + ".gif"
----> 5 _ = gif.build_gif(imgs, saveto=gif_name, show_gif=False)
      6 ipyd.Image(url=gif_name,height=200, width=200)

/home/ppyht2/tf-repos/tf-exercise/ex1. Image Redraw/gif.py in build_gif(imgs, interval, dpi, save_gif, saveto, show_gif, cmap)
     38     """
     39     imgs = np.asarray(imgs)
---> 40     h, w, *c = imgs[0].shape
     41     fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
     42     fig.subplots_adjust(bottom=0)

IndexError: index 0 is out of bounds for axis 0 with size 0

In [ ]: